Loading libraries¶

In [ ]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import tensorflow as tf
import seaborn as sns
sns.set()
from PIL import Image
import cv2
import os
import scipy as sp
import IPython
from tensorflow.keras.utils import to_categorical
from keras.callbacks import EarlyStopping, ReduceLROnPlateau
from tensorflow.keras.optimizers import Adam
from sklearn.metrics import mean_absolute_error, confusion_matrix, classification_report
from tensorflow.keras.layers import Input, Conv2D, BatchNormalization, LeakyReLU, MaxPooling2D, Flatten, Dense, Dropout
from sklearn.model_selection import train_test_split
import tensorflow as tf
from tensorflow import keras
from keras.models import Model
from tensorflow.keras import layers
from tensorflow.keras.layers import Dense, Dropout, Flatten
from tensorflow.keras.layers import Conv2D, MaxPooling2D
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import LeakyReLU
from tensorflow.keras.models import clone_model
from tensorflow.keras.callbacks import ReduceLROnPlateau
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.utils import plot_model
from tensorflow.keras import regularizers
from tensorflow.keras import initializers
from tensorflow.keras.models import load_model

Mounting drive

In [ ]:
from google.colab import drive
drive.mount('/content/drive',force_remount=True)
Mounted at /content/drive

Reading masked data¶

In [ ]:
#Loading the mask superimposed UTK dataset
df_final = pd.read_csv('/content/drive/MyDrive/OCCLUDED.csv')
In [ ]:
df_final.head()
Out[ ]:
age gender race pixels
0 49 1 4 250 237 222 205 185 167 178 151 128 171 137 10...
1 50 0 0 102 85 59 93 75 50 72 54 32 74 53 34 103 72 45...
2 50 0 0 97 62 36 109 72 46 135 102 80 189 165 148 242 ...
3 50 0 0 59 107 131 62 104 122 69 102 112 80 108 112 13...
4 50 0 0 0 9 6 1 5 11 1 1 10 13 10 16 25 24 21 33 26 15...
In [ ]:
#labels of ethncity
df_final=df_final.loc[(df_final.race==0 ) | ( df_final.race==1) | ( df_final.race==2) | ( df_final.race==3) | (df_final.race==4)]
In [ ]:
#0 - represents white, 1 - represents black,2 - represents asian,3 - represents indian and 4 - represents others
df_final.race.value_counts()
Out[ ]:
0    8815
1    4109
3    3468
2    2573
4    1402
Name: race, dtype: int64
In [ ]:
# 0 represents male and 1 represents female
df_final.gender.value_counts()
Out[ ]:
0    10492
1     9875
Name: gender, dtype: int64
In [ ]:
#Getting the number of people belonging to each age
df_final.age.value_counts()
Out[ ]:
26     2095
28      893
35      866
24      833
25      714
       ... 
116       3
101       2
91        2
111       1
68        1
Name: age, Length: 95, dtype: int64
In [ ]:
df_final['pixels'] = df_final['pixels'].apply(lambda x: np.array(x.split(), dtype="float32"))
In [ ]:
# normalizing pixels data
df_final['pixels'] = df_final['pixels'].apply(lambda x: x/255)
df_final = df_final.sample(frac=1).reset_index(drop=True)
In [ ]:
X = np.array(df_final['pixels'].tolist())
df_final
Out[ ]:
age gender race pixels
0 26 1 3 [0.2901961, 0.28627452, 0.27058825, 0.15294118...
1 26 1 0 [0.9372549, 0.93333334, 0.9882353, 0.9372549, ...
2 26 0 0 [0.11372549, 0.07450981, 0.06666667, 0.0784313...
3 38 0 0 [0.3254902, 0.2627451, 0.2, 0.30588236, 0.2549...
4 12 1 0 [0.16862746, 0.1764706, 0.15686275, 0.16862746...
... ... ... ... ...
20362 52 0 3 [0.91764706, 0.91764706, 0.92156863, 0.6823529...
20363 27 0 1 [0.68235296, 0.43529412, 0.3764706, 0.7411765,...
20364 35 1 1 [0.011764706, 0.011764706, 0.011764706, 0.0117...
20365 31 1 1 [0.05882353, 0.023529412, 0.03529412, 0.054901...
20366 30 0 1 [0.827451, 0.827451, 0.80784315, 0.7137255, 0....

20367 rows × 4 columns

In [ ]:
X = X.reshape(X.shape[0],50,50,3)
In [ ]:
y_new = np.array(df_final[['gender', 'race', 'age']])
print(y_new)
[[ 1  3 26]
 [ 1  0 26]
 [ 0  0 26]
 ...
 [ 1  1 35]
 [ 1  1 31]
 [ 0  1 30]]
In [ ]:
# Splitting the data
# Training-60%, Validation-20%, Testing-20%
X_train, X_test, y_train, y_test = train_test_split(X, y_new, test_size=0.2, random_state=42)
X_train, X_cv, y_train, y_cv = train_test_split(X_train,y_train,test_size = 0.25,train_size =0.75,random_state=42)
In [ ]:
# Segregating the labels into different arrays
y_gender_train = y_train[:,0]
y_gender_test = y_test[:,0]
y_gender_cv = y_cv[:,0]
y_ethnicity_train = y_train[:,1]
y_ethnicity_test = y_test[:,1]
y_ethnicity_cv = y_cv[:,1]
y_age_train = y_train[:,2]
y_age_test = y_test[:,2]
y_age_cv = y_cv[:,2]
In [ ]:
#Finding the position to slice
eth_train_len = len(y_ethnicity_train)
eth_cv_len=len(y_ethnicity_cv)
y_ethnicity_concat = np.concatenate((y_ethnicity_train,y_ethnicity_cv, y_ethnicity_test))
y_ethnicity_concat = y_ethnicity_concat.astype(np.uint8)
y_ethnicity = to_categorical(y_ethnicity_concat)

#One hot encoding- For example if a person is of ethnicity 'Indian' - 3, we one hot encode it so that it is saves as [0 0 0 1 0]. The '1' in the third position indicates that the ethnicity of the person in the image is 'Indian'.

One hot encoding- For example if a person is of ethnicity 'Indian' - 3, we one hot encode it so that it is saves as [0 0 0 1 0]. The '1' in the third position indicates that the ethnicity of the person in the image is 'Indian'.

In [ ]:
y_ethnicity_train = y_ethnicity[:eth_train_len]
y_ethnicity_cv=y_ethnicity[eth_train_len:(eth_train_len+eth_cv_len)]
y_ethnicity_test = y_ethnicity[(eth_train_len+eth_cv_len):]

RESNET Model¶

In [ ]:
#MODEL ARCHITECTURE
inputs = tf.keras.Input(shape=(50, 50, 3))
x = inputs

#Architecture of model
x = Conv2D(160, kernel_size=(3,3),padding='same')(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.LeakyReLU(0.3)(x)

x = tf.keras.layers.MaxPool2D()(x)

x = Conv2D(192, kernel_size=(3,3),padding='same')(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.LeakyReLU(0.3)(x)

x = tf.keras.layers.AvgPool2D()(x)

x3 = Conv2D(224, kernel_size=(3,3),padding='same')(x)

x3 = tf.keras.layers.BatchNormalization()(x3)
x3 = tf.keras.layers.LeakyReLU(0.1)(x3)
x3 = tf.keras.layers.AvgPool2D()(x3)

x2 = Conv2D(224, kernel_size=(3,3),padding='same')(x3)

x2 = tf.keras.layers.BatchNormalization()(x2)
x2 = tf.keras.layers.LeakyReLU(0.1)(x2)
x2 = tf.keras.layers.MaxPool2D()(x2)

x1 = Conv2D(224, kernel_size=(3,3),padding='same')(x2)

x1 = tf.keras.layers.BatchNormalization()(x1)
x1 = tf.keras.layers.LeakyReLU(0.1)(x1)
x1 = tf.keras.layers.MaxPool2D()(x1)

x1 = layers.Flatten()(x1)
x1 = layers.Dense(896,activation='relu')(x1)    
x1 = tf.keras.layers.Dropout(0.5)(x1)           
x1 = layers.Dense(896,activation='relu')(x1)

x2 = layers.Flatten()(x2)
x2 = layers.Dense(896,activation='relu')(x2)    
x2 = tf.keras.layers.Dropout(0.5)(x2)           
x2 = layers.Dense(896,activation='relu')(x2)

x3 = layers.Flatten()(x3)
x3 = layers.Dense(896,activation='relu')(x3)    
x3 = tf.keras.layers.Dropout(0.5)(x3)           
x3 = layers.Dense(896,activation='relu')(x3)


#bottleneck
out_gender = layers.Dense(1, activation='sigmoid', name='gender_out')(x2) ## output binary [0 or 1]
out_ethnicity = layers.Dense(5, activation='softmax', name='ethnicity_out')(x1) ## output categorical Example: [0,1,0,0,0]
out_age=layers.Dense(1, name='age_out')(x3) ## output continuous [1,2.....106] 


model = tf.keras.Model(inputs=inputs, outputs=[out_gender, out_ethnicity, out_age])

##Compiling Model
model.compile(
      optimizer='adam',
        loss={'gender_out':'BinaryCrossentropy',
              'ethnicity_out':'categorical_crossentropy',
              'age_out':'mse'},
        metrics={'gender_out':'accuracy',
                 'ethnicity_out':'accuracy',
                 'age_out':'mae'})

#Summarizing Model
model.summary()
Model: "model"
__________________________________________________________________________________________________
 Layer (type)                   Output Shape         Param #     Connected to                     
==================================================================================================
 input_1 (InputLayer)           [(None, 50, 50, 3)]  0           []                               
                                                                                                  
 conv2d (Conv2D)                (None, 50, 50, 160)  4480        ['input_1[0][0]']                
                                                                                                  
 batch_normalization (BatchNorm  (None, 50, 50, 160)  640        ['conv2d[0][0]']                 
 alization)                                                                                       
                                                                                                  
 leaky_re_lu (LeakyReLU)        (None, 50, 50, 160)  0           ['batch_normalization[0][0]']    
                                                                                                  
 max_pooling2d (MaxPooling2D)   (None, 25, 25, 160)  0           ['leaky_re_lu[0][0]']            
                                                                                                  
 conv2d_1 (Conv2D)              (None, 25, 25, 192)  276672      ['max_pooling2d[0][0]']          
                                                                                                  
 batch_normalization_1 (BatchNo  (None, 25, 25, 192)  768        ['conv2d_1[0][0]']               
 rmalization)                                                                                     
                                                                                                  
 leaky_re_lu_1 (LeakyReLU)      (None, 25, 25, 192)  0           ['batch_normalization_1[0][0]']  
                                                                                                  
 average_pooling2d (AveragePool  (None, 12, 12, 192)  0          ['leaky_re_lu_1[0][0]']          
 ing2D)                                                                                           
                                                                                                  
 conv2d_2 (Conv2D)              (None, 12, 12, 224)  387296      ['average_pooling2d[0][0]']      
                                                                                                  
 batch_normalization_2 (BatchNo  (None, 12, 12, 224)  896        ['conv2d_2[0][0]']               
 rmalization)                                                                                     
                                                                                                  
 leaky_re_lu_2 (LeakyReLU)      (None, 12, 12, 224)  0           ['batch_normalization_2[0][0]']  
                                                                                                  
 average_pooling2d_1 (AveragePo  (None, 6, 6, 224)   0           ['leaky_re_lu_2[0][0]']          
 oling2D)                                                                                         
                                                                                                  
 conv2d_3 (Conv2D)              (None, 6, 6, 224)    451808      ['average_pooling2d_1[0][0]']    
                                                                                                  
 batch_normalization_3 (BatchNo  (None, 6, 6, 224)   896         ['conv2d_3[0][0]']               
 rmalization)                                                                                     
                                                                                                  
 leaky_re_lu_3 (LeakyReLU)      (None, 6, 6, 224)    0           ['batch_normalization_3[0][0]']  
                                                                                                  
 max_pooling2d_1 (MaxPooling2D)  (None, 3, 3, 224)   0           ['leaky_re_lu_3[0][0]']          
                                                                                                  
 conv2d_4 (Conv2D)              (None, 3, 3, 224)    451808      ['max_pooling2d_1[0][0]']        
                                                                                                  
 batch_normalization_4 (BatchNo  (None, 3, 3, 224)   896         ['conv2d_4[0][0]']               
 rmalization)                                                                                     
                                                                                                  
 leaky_re_lu_4 (LeakyReLU)      (None, 3, 3, 224)    0           ['batch_normalization_4[0][0]']  
                                                                                                  
 max_pooling2d_2 (MaxPooling2D)  (None, 1, 1, 224)   0           ['leaky_re_lu_4[0][0]']          
                                                                                                  
 flatten_1 (Flatten)            (None, 2016)         0           ['max_pooling2d_1[0][0]']        
                                                                                                  
 flatten (Flatten)              (None, 224)          0           ['max_pooling2d_2[0][0]']        
                                                                                                  
 flatten_2 (Flatten)            (None, 8064)         0           ['average_pooling2d_1[0][0]']    
                                                                                                  
 dense_2 (Dense)                (None, 896)          1807232     ['flatten_1[0][0]']              
                                                                                                  
 dense (Dense)                  (None, 896)          201600      ['flatten[0][0]']                
                                                                                                  
 dense_4 (Dense)                (None, 896)          7226240     ['flatten_2[0][0]']              
                                                                                                  
 dropout_1 (Dropout)            (None, 896)          0           ['dense_2[0][0]']                
                                                                                                  
 dropout (Dropout)              (None, 896)          0           ['dense[0][0]']                  
                                                                                                  
 dropout_2 (Dropout)            (None, 896)          0           ['dense_4[0][0]']                
                                                                                                  
 dense_3 (Dense)                (None, 896)          803712      ['dropout_1[0][0]']              
                                                                                                  
 dense_1 (Dense)                (None, 896)          803712      ['dropout[0][0]']                
                                                                                                  
 dense_5 (Dense)                (None, 896)          803712      ['dropout_2[0][0]']              
                                                                                                  
 gender_out (Dense)             (None, 1)            897         ['dense_3[0][0]']                
                                                                                                  
 ethnicity_out (Dense)          (None, 5)            4485        ['dense_1[0][0]']                
                                                                                                  
 age_out (Dense)                (None, 1)            897         ['dense_5[0][0]']                
                                                                                                  
==================================================================================================
Total params: 13,228,647
Trainable params: 13,226,599
Non-trainable params: 2,048
__________________________________________________________________________________________________

TRAINING¶

In [ ]:
##Defining batch size and callbacks
batch_size = 8
epochs = 39
In [ ]:
#Training model
history = model.fit(X_train, {'gender_out': y_gender_train, 'ethnicity_out': y_ethnicity_train, 'age_out': y_age_train},
                         batch_size=batch_size,
                         epochs = epochs, validation_data = (X_cv, [y_gender_cv, y_ethnicity_cv, y_age_cv]),
                         steps_per_epoch=(X_train.shape[0] // batch_size)
                         )
Epoch 1/39
1527/1527 [==============================] - 26s 13ms/step - loss: 320.8661 - gender_out_loss: 0.6939 - ethnicity_out_loss: 1.3735 - age_out_loss: 318.7982 - gender_out_accuracy: 0.6207 - ethnicity_out_accuracy: 0.4632 - age_out_mae: 13.3500 - val_loss: 408.0410 - val_gender_out_loss: 0.5624 - val_ethnicity_out_loss: 1.3372 - val_age_out_loss: 406.1412 - val_gender_out_accuracy: 0.7143 - val_ethnicity_out_accuracy: 0.4487 - val_age_out_mae: 15.3153
Epoch 2/39
1527/1527 [==============================] - 20s 13ms/step - loss: 251.6921 - gender_out_loss: 0.5655 - ethnicity_out_loss: 1.2648 - age_out_loss: 249.8616 - gender_out_accuracy: 0.7162 - ethnicity_out_accuracy: 0.5124 - age_out_mae: 11.8900 - val_loss: 247.5512 - val_gender_out_loss: 0.5154 - val_ethnicity_out_loss: 1.2302 - val_age_out_loss: 245.8058 - val_gender_out_accuracy: 0.7734 - val_ethnicity_out_accuracy: 0.5245 - val_age_out_mae: 11.6918
Epoch 3/39
1527/1527 [==============================] - 20s 13ms/step - loss: 221.2417 - gender_out_loss: 0.5072 - ethnicity_out_loss: 1.1739 - age_out_loss: 219.5603 - gender_out_accuracy: 0.7589 - ethnicity_out_accuracy: 0.5570 - age_out_mae: 11.0869 - val_loss: 258.1390 - val_gender_out_loss: 0.4911 - val_ethnicity_out_loss: 1.1798 - val_age_out_loss: 256.4683 - val_gender_out_accuracy: 0.8230 - val_ethnicity_out_accuracy: 0.5594 - val_age_out_mae: 12.2683
Epoch 4/39
1527/1527 [==============================] - 20s 13ms/step - loss: 201.9085 - gender_out_loss: 0.4762 - ethnicity_out_loss: 1.0879 - age_out_loss: 200.3448 - gender_out_accuracy: 0.7800 - ethnicity_out_accuracy: 0.6031 - age_out_mae: 10.6347 - val_loss: 190.7379 - val_gender_out_loss: 0.4846 - val_ethnicity_out_loss: 1.1415 - val_age_out_loss: 189.1117 - val_gender_out_accuracy: 0.8272 - val_ethnicity_out_accuracy: 0.5601 - val_age_out_mae: 10.4899
Epoch 5/39
1527/1527 [==============================] - 20s 13ms/step - loss: 180.5287 - gender_out_loss: 0.4694 - ethnicity_out_loss: 1.0303 - age_out_loss: 179.0290 - gender_out_accuracy: 0.7834 - ethnicity_out_accuracy: 0.6266 - age_out_mae: 10.0724 - val_loss: 406.3299 - val_gender_out_loss: 0.5048 - val_ethnicity_out_loss: 1.3595 - val_age_out_loss: 404.4657 - val_gender_out_accuracy: 0.8154 - val_ethnicity_out_accuracy: 0.5216 - val_age_out_mae: 15.4159
Epoch 6/39
1527/1527 [==============================] - 20s 13ms/step - loss: 167.4009 - gender_out_loss: 0.4499 - ethnicity_out_loss: 0.9818 - age_out_loss: 165.9691 - gender_out_accuracy: 0.7964 - ethnicity_out_accuracy: 0.6504 - age_out_mae: 9.7277 - val_loss: 462.9222 - val_gender_out_loss: 0.4324 - val_ethnicity_out_loss: 1.0309 - val_age_out_loss: 461.4586 - val_gender_out_accuracy: 0.8419 - val_ethnicity_out_accuracy: 0.6242 - val_age_out_mae: 17.6381
Epoch 7/39
1527/1527 [==============================] - 20s 13ms/step - loss: 155.5247 - gender_out_loss: 0.4380 - ethnicity_out_loss: 0.9303 - age_out_loss: 154.1564 - gender_out_accuracy: 0.8011 - ethnicity_out_accuracy: 0.6748 - age_out_mae: 9.3992 - val_loss: 309.2920 - val_gender_out_loss: 0.4381 - val_ethnicity_out_loss: 0.9582 - val_age_out_loss: 307.8957 - val_gender_out_accuracy: 0.8262 - val_ethnicity_out_accuracy: 0.6583 - val_age_out_mae: 14.1514
Epoch 8/39
1527/1527 [==============================] - 20s 13ms/step - loss: 147.8836 - gender_out_loss: 0.4289 - ethnicity_out_loss: 0.8952 - age_out_loss: 146.5594 - gender_out_accuracy: 0.8071 - ethnicity_out_accuracy: 0.6824 - age_out_mae: 9.1270 - val_loss: 458.1919 - val_gender_out_loss: 0.4722 - val_ethnicity_out_loss: 0.9874 - val_age_out_loss: 456.7324 - val_gender_out_accuracy: 0.7815 - val_ethnicity_out_accuracy: 0.6443 - val_age_out_mae: 17.5140
Epoch 9/39
1527/1527 [==============================] - 20s 13ms/step - loss: 139.5304 - gender_out_loss: 0.4141 - ethnicity_out_loss: 0.8591 - age_out_loss: 138.2572 - gender_out_accuracy: 0.8152 - ethnicity_out_accuracy: 0.7012 - age_out_mae: 8.8867 - val_loss: 449.9211 - val_gender_out_loss: 0.4100 - val_ethnicity_out_loss: 0.9475 - val_age_out_loss: 448.5633 - val_gender_out_accuracy: 0.8304 - val_ethnicity_out_accuracy: 0.6622 - val_age_out_mae: 17.2776
Epoch 10/39
1527/1527 [==============================] - 20s 13ms/step - loss: 135.0754 - gender_out_loss: 0.4029 - ethnicity_out_loss: 0.8162 - age_out_loss: 133.8564 - gender_out_accuracy: 0.8227 - ethnicity_out_accuracy: 0.7164 - age_out_mae: 8.7518 - val_loss: 354.5110 - val_gender_out_loss: 0.3528 - val_ethnicity_out_loss: 0.9390 - val_age_out_loss: 353.2193 - val_gender_out_accuracy: 0.8711 - val_ethnicity_out_accuracy: 0.6787 - val_age_out_mae: 15.1731
Epoch 11/39
1527/1527 [==============================] - 20s 13ms/step - loss: 129.7506 - gender_out_loss: 0.3920 - ethnicity_out_loss: 0.7730 - age_out_loss: 128.5855 - gender_out_accuracy: 0.8254 - ethnicity_out_accuracy: 0.7307 - age_out_mae: 8.6085 - val_loss: 262.0480 - val_gender_out_loss: 0.3414 - val_ethnicity_out_loss: 1.0543 - val_age_out_loss: 260.6524 - val_gender_out_accuracy: 0.8500 - val_ethnicity_out_accuracy: 0.6679 - val_age_out_mae: 12.8855
Epoch 12/39
1527/1527 [==============================] - 20s 13ms/step - loss: 126.6115 - gender_out_loss: 0.3825 - ethnicity_out_loss: 0.7317 - age_out_loss: 125.4974 - gender_out_accuracy: 0.8334 - ethnicity_out_accuracy: 0.7490 - age_out_mae: 8.4615 - val_loss: 367.3835 - val_gender_out_loss: 0.4451 - val_ethnicity_out_loss: 1.1861 - val_age_out_loss: 365.7524 - val_gender_out_accuracy: 0.8238 - val_ethnicity_out_accuracy: 0.5891 - val_age_out_mae: 15.5960
Epoch 13/39
1527/1527 [==============================] - 20s 13ms/step - loss: 119.6997 - gender_out_loss: 0.3773 - ethnicity_out_loss: 0.6871 - age_out_loss: 118.6354 - gender_out_accuracy: 0.8357 - ethnicity_out_accuracy: 0.7634 - age_out_mae: 8.2405 - val_loss: 223.2485 - val_gender_out_loss: 0.3413 - val_ethnicity_out_loss: 1.0380 - val_age_out_loss: 221.8691 - val_gender_out_accuracy: 0.8692 - val_ethnicity_out_accuracy: 0.6632 - val_age_out_mae: 11.7104
Epoch 14/39
1527/1527 [==============================] - 20s 13ms/step - loss: 114.5749 - gender_out_loss: 0.3574 - ethnicity_out_loss: 0.6349 - age_out_loss: 113.5827 - gender_out_accuracy: 0.8434 - ethnicity_out_accuracy: 0.7772 - age_out_mae: 8.0440 - val_loss: 263.9789 - val_gender_out_loss: 0.3520 - val_ethnicity_out_loss: 0.9826 - val_age_out_loss: 262.6441 - val_gender_out_accuracy: 0.8652 - val_ethnicity_out_accuracy: 0.6750 - val_age_out_mae: 13.0280
Epoch 15/39
1527/1527 [==============================] - 20s 13ms/step - loss: 109.7279 - gender_out_loss: 0.3376 - ethnicity_out_loss: 0.5925 - age_out_loss: 108.7979 - gender_out_accuracy: 0.8510 - ethnicity_out_accuracy: 0.7921 - age_out_mae: 7.9085 - val_loss: 326.4453 - val_gender_out_loss: 0.3450 - val_ethnicity_out_loss: 0.9666 - val_age_out_loss: 325.1337 - val_gender_out_accuracy: 0.8733 - val_ethnicity_out_accuracy: 0.6816 - val_age_out_mae: 14.6251
Epoch 16/39
1527/1527 [==============================] - 20s 13ms/step - loss: 106.4306 - gender_out_loss: 0.2984 - ethnicity_out_loss: 0.5407 - age_out_loss: 105.5916 - gender_out_accuracy: 0.8749 - ethnicity_out_accuracy: 0.8113 - age_out_mae: 7.8045 - val_loss: 353.0080 - val_gender_out_loss: 0.3431 - val_ethnicity_out_loss: 1.0308 - val_age_out_loss: 351.6343 - val_gender_out_accuracy: 0.8719 - val_ethnicity_out_accuracy: 0.6684 - val_age_out_mae: 15.3910
Epoch 17/39
1527/1527 [==============================] - 20s 13ms/step - loss: 101.4483 - gender_out_loss: 0.2794 - ethnicity_out_loss: 0.4959 - age_out_loss: 100.6730 - gender_out_accuracy: 0.8884 - ethnicity_out_accuracy: 0.8256 - age_out_mae: 7.5957 - val_loss: 292.6730 - val_gender_out_loss: 0.3262 - val_ethnicity_out_loss: 1.1794 - val_age_out_loss: 291.1674 - val_gender_out_accuracy: 0.8709 - val_ethnicity_out_accuracy: 0.6598 - val_age_out_mae: 13.5556
Epoch 18/39
1527/1527 [==============================] - 20s 13ms/step - loss: 96.0783 - gender_out_loss: 0.2618 - ethnicity_out_loss: 0.4438 - age_out_loss: 95.3726 - gender_out_accuracy: 0.8939 - ethnicity_out_accuracy: 0.8417 - age_out_mae: 7.4082 - val_loss: 416.6160 - val_gender_out_loss: 0.3167 - val_ethnicity_out_loss: 1.0833 - val_age_out_loss: 415.2160 - val_gender_out_accuracy: 0.8778 - val_ethnicity_out_accuracy: 0.6632 - val_age_out_mae: 16.8134
Epoch 19/39
1527/1527 [==============================] - 20s 13ms/step - loss: 95.5168 - gender_out_loss: 0.2549 - ethnicity_out_loss: 0.4036 - age_out_loss: 94.8583 - gender_out_accuracy: 0.8976 - ethnicity_out_accuracy: 0.8600 - age_out_mae: 7.3552 - val_loss: 322.6083 - val_gender_out_loss: 0.3296 - val_ethnicity_out_loss: 1.1488 - val_age_out_loss: 321.1298 - val_gender_out_accuracy: 0.8800 - val_ethnicity_out_accuracy: 0.6706 - val_age_out_mae: 14.6408
Epoch 20/39
1527/1527 [==============================] - 20s 13ms/step - loss: 91.2605 - gender_out_loss: 0.2415 - ethnicity_out_loss: 0.3647 - age_out_loss: 90.6544 - gender_out_accuracy: 0.9071 - ethnicity_out_accuracy: 0.8722 - age_out_mae: 7.1920 - val_loss: 345.2632 - val_gender_out_loss: 0.3036 - val_ethnicity_out_loss: 1.2757 - val_age_out_loss: 343.6837 - val_gender_out_accuracy: 0.8800 - val_ethnicity_out_accuracy: 0.6669 - val_age_out_mae: 15.2395
Epoch 21/39
1527/1527 [==============================] - 20s 13ms/step - loss: 86.3991 - gender_out_loss: 0.2376 - ethnicity_out_loss: 0.3223 - age_out_loss: 85.8391 - gender_out_accuracy: 0.9081 - ethnicity_out_accuracy: 0.8881 - age_out_mae: 7.0154 - val_loss: 293.6187 - val_gender_out_loss: 0.3040 - val_ethnicity_out_loss: 1.3848 - val_age_out_loss: 291.9298 - val_gender_out_accuracy: 0.8824 - val_ethnicity_out_accuracy: 0.6544 - val_age_out_mae: 13.8927
Epoch 22/39
1527/1527 [==============================] - 20s 13ms/step - loss: 82.9990 - gender_out_loss: 0.2202 - ethnicity_out_loss: 0.2855 - age_out_loss: 82.4934 - gender_out_accuracy: 0.9129 - ethnicity_out_accuracy: 0.8993 - age_out_mae: 6.8596 - val_loss: 309.1644 - val_gender_out_loss: 0.3127 - val_ethnicity_out_loss: 1.7249 - val_age_out_loss: 307.1268 - val_gender_out_accuracy: 0.8780 - val_ethnicity_out_accuracy: 0.6559 - val_age_out_mae: 14.2513
Epoch 23/39
1527/1527 [==============================] - 20s 13ms/step - loss: 80.0036 - gender_out_loss: 0.2135 - ethnicity_out_loss: 0.2660 - age_out_loss: 79.5242 - gender_out_accuracy: 0.9190 - ethnicity_out_accuracy: 0.9082 - age_out_mae: 6.7605 - val_loss: 335.2199 - val_gender_out_loss: 0.3104 - val_ethnicity_out_loss: 1.4996 - val_age_out_loss: 333.4098 - val_gender_out_accuracy: 0.8760 - val_ethnicity_out_accuracy: 0.6586 - val_age_out_mae: 14.7251
Epoch 24/39
1527/1527 [==============================] - 20s 13ms/step - loss: 75.6714 - gender_out_loss: 0.1988 - ethnicity_out_loss: 0.2421 - age_out_loss: 75.2306 - gender_out_accuracy: 0.9269 - ethnicity_out_accuracy: 0.9136 - age_out_mae: 6.5530 - val_loss: 333.7614 - val_gender_out_loss: 0.3399 - val_ethnicity_out_loss: 1.5639 - val_age_out_loss: 331.8575 - val_gender_out_accuracy: 0.8724 - val_ethnicity_out_accuracy: 0.6414 - val_age_out_mae: 14.3972
Epoch 25/39
1527/1527 [==============================] - 20s 13ms/step - loss: 74.5585 - gender_out_loss: 0.1878 - ethnicity_out_loss: 0.2218 - age_out_loss: 74.1490 - gender_out_accuracy: 0.9288 - ethnicity_out_accuracy: 0.9220 - age_out_mae: 6.5012 - val_loss: 291.8572 - val_gender_out_loss: 0.3270 - val_ethnicity_out_loss: 1.6501 - val_age_out_loss: 289.8800 - val_gender_out_accuracy: 0.8756 - val_ethnicity_out_accuracy: 0.6598 - val_age_out_mae: 13.6761
Epoch 26/39
1527/1527 [==============================] - 20s 13ms/step - loss: 68.8344 - gender_out_loss: 0.1703 - ethnicity_out_loss: 0.1949 - age_out_loss: 68.4693 - gender_out_accuracy: 0.9352 - ethnicity_out_accuracy: 0.9324 - age_out_mae: 6.2917 - val_loss: 238.3644 - val_gender_out_loss: 0.2999 - val_ethnicity_out_loss: 1.8298 - val_age_out_loss: 236.2348 - val_gender_out_accuracy: 0.8812 - val_ethnicity_out_accuracy: 0.6458 - val_age_out_mae: 12.1244
Epoch 27/39
1527/1527 [==============================] - 20s 13ms/step - loss: 67.9118 - gender_out_loss: 0.1639 - ethnicity_out_loss: 0.1807 - age_out_loss: 67.5672 - gender_out_accuracy: 0.9407 - ethnicity_out_accuracy: 0.9369 - age_out_mae: 6.2516 - val_loss: 257.4468 - val_gender_out_loss: 0.3106 - val_ethnicity_out_loss: 1.7296 - val_age_out_loss: 255.4065 - val_gender_out_accuracy: 0.8773 - val_ethnicity_out_accuracy: 0.6659 - val_age_out_mae: 12.6065
Epoch 28/39
1527/1527 [==============================] - 20s 13ms/step - loss: 63.7606 - gender_out_loss: 0.1593 - ethnicity_out_loss: 0.1673 - age_out_loss: 63.4341 - gender_out_accuracy: 0.9396 - ethnicity_out_accuracy: 0.9450 - age_out_mae: 6.0511 - val_loss: 324.6809 - val_gender_out_loss: 0.2995 - val_ethnicity_out_loss: 1.8620 - val_age_out_loss: 322.5195 - val_gender_out_accuracy: 0.8837 - val_ethnicity_out_accuracy: 0.6581 - val_age_out_mae: 14.5312
Epoch 29/39
1527/1527 [==============================] - 20s 13ms/step - loss: 60.4842 - gender_out_loss: 0.1484 - ethnicity_out_loss: 0.1540 - age_out_loss: 60.1818 - gender_out_accuracy: 0.9436 - ethnicity_out_accuracy: 0.9471 - age_out_mae: 5.8896 - val_loss: 400.2004 - val_gender_out_loss: 0.3272 - val_ethnicity_out_loss: 2.0286 - val_age_out_loss: 397.8447 - val_gender_out_accuracy: 0.8726 - val_ethnicity_out_accuracy: 0.6610 - val_age_out_mae: 16.3625
Epoch 30/39
1527/1527 [==============================] - 20s 13ms/step - loss: 58.6484 - gender_out_loss: 0.1363 - ethnicity_out_loss: 0.1421 - age_out_loss: 58.3701 - gender_out_accuracy: 0.9492 - ethnicity_out_accuracy: 0.9532 - age_out_mae: 5.8207 - val_loss: 382.1413 - val_gender_out_loss: 0.3128 - val_ethnicity_out_loss: 1.8928 - val_age_out_loss: 379.9355 - val_gender_out_accuracy: 0.8706 - val_ethnicity_out_accuracy: 0.6608 - val_age_out_mae: 15.9396
Epoch 31/39
1527/1527 [==============================] - 20s 13ms/step - loss: 55.2232 - gender_out_loss: 0.1272 - ethnicity_out_loss: 0.1297 - age_out_loss: 54.9664 - gender_out_accuracy: 0.9532 - ethnicity_out_accuracy: 0.9541 - age_out_mae: 5.6194 - val_loss: 294.0450 - val_gender_out_loss: 0.3015 - val_ethnicity_out_loss: 2.0969 - val_age_out_loss: 291.6467 - val_gender_out_accuracy: 0.8844 - val_ethnicity_out_accuracy: 0.6578 - val_age_out_mae: 13.7772
Epoch 32/39
1527/1527 [==============================] - 20s 13ms/step - loss: 58.4090 - gender_out_loss: 0.1300 - ethnicity_out_loss: 0.1464 - age_out_loss: 58.1325 - gender_out_accuracy: 0.9494 - ethnicity_out_accuracy: 0.9500 - age_out_mae: 5.7353 - val_loss: 231.2086 - val_gender_out_loss: 0.3044 - val_ethnicity_out_loss: 2.1325 - val_age_out_loss: 228.7718 - val_gender_out_accuracy: 0.8773 - val_ethnicity_out_accuracy: 0.6632 - val_age_out_mae: 11.6849
Epoch 33/39
1527/1527 [==============================] - 20s 13ms/step - loss: 51.9877 - gender_out_loss: 0.1153 - ethnicity_out_loss: 0.1234 - age_out_loss: 51.7490 - gender_out_accuracy: 0.9559 - ethnicity_out_accuracy: 0.9596 - age_out_mae: 5.4256 - val_loss: 241.7180 - val_gender_out_loss: 0.3828 - val_ethnicity_out_loss: 1.8858 - val_age_out_loss: 239.4494 - val_gender_out_accuracy: 0.8751 - val_ethnicity_out_accuracy: 0.6711 - val_age_out_mae: 12.2367
Epoch 34/39
1527/1527 [==============================] - 20s 13ms/step - loss: 50.0588 - gender_out_loss: 0.1024 - ethnicity_out_loss: 0.1111 - age_out_loss: 49.8454 - gender_out_accuracy: 0.9625 - ethnicity_out_accuracy: 0.9646 - age_out_mae: 5.3397 - val_loss: 308.7733 - val_gender_out_loss: 0.3202 - val_ethnicity_out_loss: 2.1557 - val_age_out_loss: 306.2971 - val_gender_out_accuracy: 0.8812 - val_ethnicity_out_accuracy: 0.6578 - val_age_out_mae: 13.9627
Epoch 35/39
1527/1527 [==============================] - 20s 13ms/step - loss: 48.9816 - gender_out_loss: 0.1016 - ethnicity_out_loss: 0.1145 - age_out_loss: 48.7655 - gender_out_accuracy: 0.9618 - ethnicity_out_accuracy: 0.9638 - age_out_mae: 5.2734 - val_loss: 319.1381 - val_gender_out_loss: 0.3480 - val_ethnicity_out_loss: 2.0137 - val_age_out_loss: 316.7763 - val_gender_out_accuracy: 0.8812 - val_ethnicity_out_accuracy: 0.6625 - val_age_out_mae: 14.2252
Epoch 36/39
1527/1527 [==============================] - 20s 13ms/step - loss: 45.4703 - gender_out_loss: 0.0889 - ethnicity_out_loss: 0.0973 - age_out_loss: 45.2841 - gender_out_accuracy: 0.9669 - ethnicity_out_accuracy: 0.9684 - age_out_mae: 5.1317 - val_loss: 367.5019 - val_gender_out_loss: 0.4171 - val_ethnicity_out_loss: 2.3470 - val_age_out_loss: 364.7378 - val_gender_out_accuracy: 0.8785 - val_ethnicity_out_accuracy: 0.6706 - val_age_out_mae: 15.4206
Epoch 37/39
1527/1527 [==============================] - 20s 13ms/step - loss: 44.0967 - gender_out_loss: 0.0833 - ethnicity_out_loss: 0.1111 - age_out_loss: 43.9023 - gender_out_accuracy: 0.9682 - ethnicity_out_accuracy: 0.9652 - age_out_mae: 5.0125 - val_loss: 248.8840 - val_gender_out_loss: 0.3735 - val_ethnicity_out_loss: 2.5951 - val_age_out_loss: 245.9155 - val_gender_out_accuracy: 0.8802 - val_ethnicity_out_accuracy: 0.6465 - val_age_out_mae: 12.2316
Epoch 38/39
1527/1527 [==============================] - 20s 13ms/step - loss: 42.0947 - gender_out_loss: 0.0817 - ethnicity_out_loss: 0.1091 - age_out_loss: 41.9039 - gender_out_accuracy: 0.9693 - ethnicity_out_accuracy: 0.9651 - age_out_mae: 4.9134 - val_loss: 391.2341 - val_gender_out_loss: 0.3490 - val_ethnicity_out_loss: 2.4152 - val_age_out_loss: 388.4701 - val_gender_out_accuracy: 0.8650 - val_ethnicity_out_accuracy: 0.6227 - val_age_out_mae: 15.8651
Epoch 39/39
1527/1527 [==============================] - 20s 13ms/step - loss: 41.9899 - gender_out_loss: 0.0792 - ethnicity_out_loss: 0.1032 - age_out_loss: 41.8076 - gender_out_accuracy: 0.9699 - ethnicity_out_accuracy: 0.9673 - age_out_mae: 4.8614 - val_loss: 296.8275 - val_gender_out_loss: 0.4096 - val_ethnicity_out_loss: 2.2288 - val_age_out_loss: 294.1893 - val_gender_out_accuracy: 0.8846 - val_ethnicity_out_accuracy: 0.6723 - val_age_out_mae: 13.6395
In [ ]:
model.save('ResNet_train.h5')

RETRAIN¶

In [ ]:
model1 = load_model('/content/drive/MyDrive/ResNet_train.h5')
In [ ]:
##Defining batch size and callbacks
batch_size = 12
epochs = 4
In [ ]:
#Training model
history = model1.fit(X_train, {'gender_out': y_gender_train, 'ethnicity_out': y_ethnicity_train, 'age_out': y_age_train},
                         batch_size=batch_size,
                         epochs = epochs, validation_data = (X_cv, [y_gender_cv, y_ethnicity_cv, y_age_cv]),
                         steps_per_epoch=(X_train.shape[0] // batch_size)
                         )
Epoch 1/4
1018/1018 [==============================] - 15s 15ms/step - loss: 40.9740 - gender_out_loss: 0.1205 - ethnicity_out_loss: 0.1284 - age_out_loss: 40.7251 - gender_out_accuracy: 0.9557 - ethnicity_out_accuracy: 0.9571 - age_out_mae: 4.8399 - val_loss: 84.7887 - val_gender_out_loss: 0.2307 - val_ethnicity_out_loss: 0.6722 - val_age_out_loss: 83.8858 - val_gender_out_accuracy: 0.9141 - val_ethnicity_out_accuracy: 0.8120 - val_age_out_mae: 6.6939
Epoch 2/4
1018/1018 [==============================] - 15s 15ms/step - loss: 38.4619 - gender_out_loss: 0.1065 - ethnicity_out_loss: 0.1119 - age_out_loss: 38.2435 - gender_out_accuracy: 0.9639 - ethnicity_out_accuracy: 0.9619 - age_out_mae: 4.6897 - val_loss: 58.3604 - val_gender_out_loss: 0.2846 - val_ethnicity_out_loss: 0.6888 - val_age_out_loss: 57.3870 - val_gender_out_accuracy: 0.9072 - val_ethnicity_out_accuracy: 0.8105 - val_age_out_mae: 5.5922
Epoch 3/4
1018/1018 [==============================] - 15s 15ms/step - loss: 35.8337 - gender_out_loss: 0.0976 - ethnicity_out_loss: 0.1078 - age_out_loss: 35.6283 - gender_out_accuracy: 0.9662 - ethnicity_out_accuracy: 0.9635 - age_out_mae: 4.5228 - val_loss: 64.0394 - val_gender_out_loss: 0.2339 - val_ethnicity_out_loss: 0.7240 - val_age_out_loss: 63.0814 - val_gender_out_accuracy: 0.9237 - val_ethnicity_out_accuracy: 0.8166 - val_age_out_mae: 5.8069
Epoch 4/4
1018/1018 [==============================] - 15s 15ms/step - loss: 34.3671 - gender_out_loss: 0.0856 - ethnicity_out_loss: 0.1047 - age_out_loss: 34.1769 - gender_out_accuracy: 0.9705 - ethnicity_out_accuracy: 0.9642 - age_out_mae: 4.4186 - val_loss: 59.2054 - val_gender_out_loss: 0.2463 - val_ethnicity_out_loss: 0.8078 - val_age_out_loss: 58.1512 - val_gender_out_accuracy: 0.9183 - val_ethnicity_out_accuracy: 0.8189 - val_age_out_mae: 5.5291
In [ ]:
model1.save('ResNet_retrain.h5')
In [ ]:
model2 = load_model('ResNet_retrain.h5')

RESNET FINAL Model¶

In [ ]:
#FINAL MODEL
model2.summary()
Model: "model"
__________________________________________________________________________________________________
 Layer (type)                   Output Shape         Param #     Connected to                     
==================================================================================================
 input_1 (InputLayer)           [(None, 50, 50, 3)]  0           []                               
                                                                                                  
 conv2d (Conv2D)                (None, 50, 50, 160)  4480        ['input_1[0][0]']                
                                                                                                  
 batch_normalization (BatchNorm  (None, 50, 50, 160)  640        ['conv2d[0][0]']                 
 alization)                                                                                       
                                                                                                  
 leaky_re_lu (LeakyReLU)        (None, 50, 50, 160)  0           ['batch_normalization[0][0]']    
                                                                                                  
 max_pooling2d (MaxPooling2D)   (None, 25, 25, 160)  0           ['leaky_re_lu[0][0]']            
                                                                                                  
 conv2d_1 (Conv2D)              (None, 25, 25, 192)  276672      ['max_pooling2d[0][0]']          
                                                                                                  
 batch_normalization_1 (BatchNo  (None, 25, 25, 192)  768        ['conv2d_1[0][0]']               
 rmalization)                                                                                     
                                                                                                  
 leaky_re_lu_1 (LeakyReLU)      (None, 25, 25, 192)  0           ['batch_normalization_1[0][0]']  
                                                                                                  
 average_pooling2d (AveragePool  (None, 12, 12, 192)  0          ['leaky_re_lu_1[0][0]']          
 ing2D)                                                                                           
                                                                                                  
 conv2d_2 (Conv2D)              (None, 12, 12, 224)  387296      ['average_pooling2d[0][0]']      
                                                                                                  
 batch_normalization_2 (BatchNo  (None, 12, 12, 224)  896        ['conv2d_2[0][0]']               
 rmalization)                                                                                     
                                                                                                  
 leaky_re_lu_2 (LeakyReLU)      (None, 12, 12, 224)  0           ['batch_normalization_2[0][0]']  
                                                                                                  
 average_pooling2d_1 (AveragePo  (None, 6, 6, 224)   0           ['leaky_re_lu_2[0][0]']          
 oling2D)                                                                                         
                                                                                                  
 conv2d_3 (Conv2D)              (None, 6, 6, 224)    451808      ['average_pooling2d_1[0][0]']    
                                                                                                  
 batch_normalization_3 (BatchNo  (None, 6, 6, 224)   896         ['conv2d_3[0][0]']               
 rmalization)                                                                                     
                                                                                                  
 leaky_re_lu_3 (LeakyReLU)      (None, 6, 6, 224)    0           ['batch_normalization_3[0][0]']  
                                                                                                  
 max_pooling2d_1 (MaxPooling2D)  (None, 3, 3, 224)   0           ['leaky_re_lu_3[0][0]']          
                                                                                                  
 conv2d_4 (Conv2D)              (None, 3, 3, 224)    451808      ['max_pooling2d_1[0][0]']        
                                                                                                  
 batch_normalization_4 (BatchNo  (None, 3, 3, 224)   896         ['conv2d_4[0][0]']               
 rmalization)                                                                                     
                                                                                                  
 leaky_re_lu_4 (LeakyReLU)      (None, 3, 3, 224)    0           ['batch_normalization_4[0][0]']  
                                                                                                  
 max_pooling2d_2 (MaxPooling2D)  (None, 1, 1, 224)   0           ['leaky_re_lu_4[0][0]']          
                                                                                                  
 flatten_1 (Flatten)            (None, 2016)         0           ['max_pooling2d_1[0][0]']        
                                                                                                  
 flatten (Flatten)              (None, 224)          0           ['max_pooling2d_2[0][0]']        
                                                                                                  
 flatten_2 (Flatten)            (None, 8064)         0           ['average_pooling2d_1[0][0]']    
                                                                                                  
 dense_2 (Dense)                (None, 896)          1807232     ['flatten_1[0][0]']              
                                                                                                  
 dense (Dense)                  (None, 896)          201600      ['flatten[0][0]']                
                                                                                                  
 dense_4 (Dense)                (None, 896)          7226240     ['flatten_2[0][0]']              
                                                                                                  
 dropout_1 (Dropout)            (None, 896)          0           ['dense_2[0][0]']                
                                                                                                  
 dropout (Dropout)              (None, 896)          0           ['dense[0][0]']                  
                                                                                                  
 dropout_2 (Dropout)            (None, 896)          0           ['dense_4[0][0]']                
                                                                                                  
 dense_3 (Dense)                (None, 896)          803712      ['dropout_1[0][0]']              
                                                                                                  
 dense_1 (Dense)                (None, 896)          803712      ['dropout[0][0]']                
                                                                                                  
 dense_5 (Dense)                (None, 896)          803712      ['dropout_2[0][0]']              
                                                                                                  
 gender_out (Dense)             (None, 1)            897         ['dense_3[0][0]']                
                                                                                                  
 ethnicity_out (Dense)          (None, 5)            4485        ['dense_1[0][0]']                
                                                                                                  
 age_out (Dense)                (None, 1)            897         ['dense_5[0][0]']                
                                                                                                  
==================================================================================================
Total params: 13,228,647
Trainable params: 13,226,599
Non-trainable params: 2,048
__________________________________________________________________________________________________

INFERENCE - OCCLUDED IMAGES¶

In [ ]:
pred = model2.predict(X_test)

#Accuracy in gender prediction
#Accuracy in ethnicity prediction
#Age mae in age prediction
test_loss,test_gender_loss, test_ethnicity_loss, test_age_loss, test_gender_acc,test_ethnicity_acc,test_age_mae = model2.evaluate(X_test, 
                                                                                   [y_gender_test, y_ethnicity_test, y_age_test], verbose=0)
print(f'\nTest gender accuracy: {test_gender_acc}')
print(f'\nTest ethnicity accuracy: {test_ethnicity_acc}')
print(f'\nTest age MAPE: {test_age_mae}')
128/128 [==============================] - 1s 7ms/step

Test gender accuracy: 0.9182621240615845

Test ethnicity accuracy: 0.8203240036964417

Test age MAPE: 5.537388801574707

ACTIVATION MAP GENERATION¶

In [ ]:
gender_weights = model2.layers[-3].get_weights()[0]
ethnicity_weights = model2.layers[-2].get_weights()[0]
age_weights = model2.layers[-1].get_weights()[0]

WE USE THE LAST CONVOLUTIONAL LAYER AND THE RESPECTIVE OUTPUT LAYERS OF THE MODEL FOR EACH ATTRIBUTE TO GENERATE AN ACTIVATION MAP

In [ ]:
gender_model  = Model(inputs=model2.input,outputs=(model2.layers[-19].output,model2.layers[-3].output))
ethnicity_model = Model(inputs=model2.input,outputs=(model2.layers[-19].output,model2.layers[-2].output))
age_model = Model(inputs=model2.input,outputs=(model2.layers[-19].output,model2.layers[-1].output))
In [ ]:
features_gender, results_gender = gender_model.predict(X_test)
features_ethnicity, results_ethnicity = ethnicity_model.predict(X_test)
features_age, results_age = age_model.predict(X_test)
128/128 [==============================] - 1s 6ms/step
128/128 [==============================] - 1s 6ms/step
128/128 [==============================] - 1s 6ms/step
In [ ]:
#CLASS ACTIVATION MAP

def activation_map(features, weights, results,x,y,z,att):
  
  for idx in range(x,y):

        features_for_one_img = features[idx,:,:,:]
        height_roomout = X_train.shape[1]/features_for_one_img.shape[0]
        width_roomout  = X_train.shape[2]/features_for_one_img.shape[1]
    
        cam_features = sp.ndimage.zoom(features_for_one_img, (height_roomout, width_roomout, z),order = 1)
        pred = np.argmax(results[idx])
        plt.figure(facecolor='white')
        cam_weights = weights[:,pred]
        cam_output  = np.dot(cam_features,cam_weights)
        fig, axs = plt.subplots(1, 2, figsize=(4, 4))
        plt.grid(False)
        plt.xticks([])
        plt.yticks([])
        if att=='eth':
          buf = 'Predicted Class = ' +str(y_test[idx][1])
        elif att=='gen':
          buf = 'Predicted Class = ' +str(y_test[idx][0]) 
        if att=='age':
          buf = 'Predicted Class = ' +str(y_test[idx][2])         
        # plot original image
        plt.xlabel(buf)
        axs[0].imshow(X_test[idx], alpha=0.5)
        axs[0].set_xlabel("Original Image")
        axs[0].grid(False)
        axs[0].set_axis_off()

        # plot activation map
        axs[1].imshow(np.squeeze(X_test[idx]), alpha=0.7)
        axs[1].imshow(cam_output, cmap='jet', alpha=0.5)
        axs[1].set_title("Class Activation Map")
        axs[1].grid(False)
        axs[0].set_axis_off()
        axs[1].tick_params(axis='both', which='both', length=0)  # Remove ticks

        plt.show()

        axs[0].tick_params(axis='both', which='both', length=0)  # Remove ticks

        # plot activation map
        axs[1].imshow(np.squeeze(X_test[idx]), alpha=0.7)
        axs[1].imshow(cam_output, cmap='jet', alpha=0.5)
        axs[1].set_title("Class Activation Map")
        axs[1].grid(False)
        axs[0].set_axis_off()
        axs[1].tick_params(axis='both', which='both', length=0)  # Remove ticks

        plt.show()
In [ ]:
#activation map to depict how the CNN learns to detect ethnicity
activation_map(features_ethnicity, ethnicity_weights, results_ethnicity,82,93,4,'eth')
<Figure size 640x480 with 0 Axes>
<Figure size 640x480 with 0 Axes>
<Figure size 640x480 with 0 Axes>
<Figure size 640x480 with 0 Axes>
<Figure size 640x480 with 0 Axes>
<Figure size 640x480 with 0 Axes>
<Figure size 640x480 with 0 Axes>
<Figure size 640x480 with 0 Axes>
<Figure size 640x480 with 0 Axes>
<Figure size 640x480 with 0 Axes>
<Figure size 640x480 with 0 Axes>
In [ ]:
#activation map to depict how the CNN learns to detect gender
activation_map(features_gender, gender_weights, results_gender,32,43,4,'gen')
<Figure size 640x480 with 0 Axes>
<Figure size 640x480 with 0 Axes>
<Figure size 640x480 with 0 Axes>
<Figure size 640x480 with 0 Axes>
<Figure size 640x480 with 0 Axes>
<Figure size 640x480 with 0 Axes>
<Figure size 640x480 with 0 Axes>
<Figure size 640x480 with 0 Axes>
<Figure size 640x480 with 0 Axes>
<Figure size 640x480 with 0 Axes>
<Figure size 640x480 with 0 Axes>
In [ ]:
#activation map to depict how the CNN learns to detect age
activation_map(features_age, age_weights, results_age,520,531,4,'age')
<Figure size 640x480 with 0 Axes>
<Figure size 640x480 with 0 Axes>
<Figure size 640x480 with 0 Axes>
<Figure size 640x480 with 0 Axes>
<Figure size 640x480 with 0 Axes>
<Figure size 640x480 with 0 Axes>
<Figure size 640x480 with 0 Axes>
<Figure size 640x480 with 0 Axes>
<Figure size 640x480 with 0 Axes>
<Figure size 640x480 with 0 Axes>
<Figure size 640x480 with 0 Axes>

MODEL ARCHITECTURE¶

In [ ]:
# RESNET MODEL ARCHITECTURE
tf.keras.utils.plot_model(
    model2, to_file='model.png', show_shapes=True, show_layer_names=True,
    rankdir='TB', expand_nested=True, dpi=96
)
Out[ ]:

Predictions on test data¶

In [ ]:
#SHOWING THE RESULTS OF A SUBSET OF TEST DATA
for i in range(20, 55):
    plt.figure(figsize=(2, 2))
    gt_age = y_test[i][2]
    gt_ethnicity = y_test[i][1]
    gt_gender = y_test[i][0]
    print("GROUND TRUTH:")
    print("Gender:", gt_gender)
    print("Ethnicity:", gt_ethnicity)
    print("Age:", gt_age)
    plt.imshow(X_test[i], interpolation='nearest')
    plt.grid(False)
    plt.xticks([])
    plt.yticks([])
    plt.show()   
    pred_gender, pred_ethnicity, pred_age = model2.predict(X_test[i][np.newaxis])
    gender_acc = np.round(pred_gender)
    ethnicity_acc = np.mean(np.argmax(pred_ethnicity))
    age_mae = np.abs(pred_age)
    print("\nPREDICTED:")
    print("Gender:", gender_acc)
    print("Ethnicity:", ethnicity_acc)
    print("Age:", age_mae)    
    print("\n------------------------------------------------\n")
    
GROUND TRUTH:
Gender: 1
Ethnicity: 0
Age: 37
1/1 [==============================] - 0s 30ms/step

PREDICTED:
Gender: [[1.]]
Ethnicity: 0.0
Age: [[38.045406]]

------------------------------------------------

GROUND TRUTH:
Gender: 1
Ethnicity: 2
Age: 24
1/1 [==============================] - 0s 30ms/step

PREDICTED:
Gender: [[1.]]
Ethnicity: 2.0
Age: [[16.828045]]

------------------------------------------------

GROUND TRUTH:
Gender: 0
Ethnicity: 0
Age: 85
1/1 [==============================] - 0s 27ms/step

PREDICTED:
Gender: [[0.]]
Ethnicity: 0.0
Age: [[100.509796]]

------------------------------------------------

GROUND TRUTH:
Gender: 0
Ethnicity: 0
Age: 49
1/1 [==============================] - 0s 38ms/step

PREDICTED:
Gender: [[0.]]
Ethnicity: 0.0
Age: [[44.58723]]

------------------------------------------------

GROUND TRUTH:
Gender: 0
Ethnicity: 0
Age: 29
1/1 [==============================] - 0s 67ms/step

PREDICTED:
Gender: [[0.]]
Ethnicity: 0.0
Age: [[28.4955]]

------------------------------------------------

GROUND TRUTH:
Gender: 1
Ethnicity: 1
Age: 28
1/1 [==============================] - 0s 27ms/step

PREDICTED:
Gender: [[1.]]
Ethnicity: 0.0
Age: [[22.19035]]

------------------------------------------------

GROUND TRUTH:
Gender: 1
Ethnicity: 1
Age: 37
1/1 [==============================] - 0s 27ms/step

PREDICTED:
Gender: [[1.]]
Ethnicity: 0.0
Age: [[33.94472]]

------------------------------------------------

GROUND TRUTH:
Gender: 1
Ethnicity: 1
Age: 48
1/1 [==============================] - 0s 21ms/step

PREDICTED:
Gender: [[1.]]
Ethnicity: 3.0
Age: [[32.198948]]

------------------------------------------------

GROUND TRUTH:
Gender: 0
Ethnicity: 1
Age: 32
1/1 [==============================] - 0s 26ms/step

PREDICTED:
Gender: [[0.]]
Ethnicity: 1.0
Age: [[36.15636]]

------------------------------------------------

GROUND TRUTH:
Gender: 1
Ethnicity: 0
Age: 16
1/1 [==============================] - 0s 27ms/step

PREDICTED:
Gender: [[1.]]
Ethnicity: 0.0
Age: [[18.048424]]

------------------------------------------------

GROUND TRUTH:
Gender: 0
Ethnicity: 2
Age: 62
1/1 [==============================] - 0s 25ms/step

PREDICTED:
Gender: [[0.]]
Ethnicity: 0.0
Age: [[44.389828]]

------------------------------------------------

GROUND TRUTH:
Gender: 1
Ethnicity: 0
Age: 58
1/1 [==============================] - 0s 30ms/step

PREDICTED:
Gender: [[1.]]
Ethnicity: 0.0
Age: [[62.15122]]

------------------------------------------------

GROUND TRUTH:
Gender: 0
Ethnicity: 4
Age: 35
1/1 [==============================] - 0s 26ms/step

PREDICTED:
Gender: [[0.]]
Ethnicity: 3.0
Age: [[32.169918]]

------------------------------------------------

GROUND TRUTH:
Gender: 1
Ethnicity: 0
Age: 80
1/1 [==============================] - 0s 21ms/step

PREDICTED:
Gender: [[1.]]
Ethnicity: 0.0
Age: [[61.740635]]

------------------------------------------------

GROUND TRUTH:
Gender: 1
Ethnicity: 0
Age: 16
1/1 [==============================] - 0s 26ms/step

PREDICTED:
Gender: [[1.]]
Ethnicity: 0.0
Age: [[18.968811]]

------------------------------------------------

GROUND TRUTH:
Gender: 1
Ethnicity: 0
Age: 32
1/1 [==============================] - 0s 34ms/step

PREDICTED:
Gender: [[1.]]
Ethnicity: 0.0
Age: [[25.56342]]

------------------------------------------------

GROUND TRUTH:
Gender: 0
Ethnicity: 2
Age: 61
1/1 [==============================] - 0s 33ms/step

PREDICTED:
Gender: [[0.]]
Ethnicity: 2.0
Age: [[63.137882]]

------------------------------------------------

GROUND TRUTH:
Gender: 1
Ethnicity: 1
Age: 23
1/1 [==============================] - 0s 21ms/step

PREDICTED:
Gender: [[0.]]
Ethnicity: 1.0
Age: [[20.67167]]

------------------------------------------------

GROUND TRUTH:
Gender: 1
Ethnicity: 0
Age: 23
1/1 [==============================] - 0s 25ms/step

PREDICTED:
Gender: [[1.]]
Ethnicity: 3.0
Age: [[21.377903]]

------------------------------------------------

GROUND TRUTH:
Gender: 0
Ethnicity: 0
Age: 32
1/1 [==============================] - 0s 21ms/step

PREDICTED:
Gender: [[0.]]
Ethnicity: 0.0
Age: [[41.62163]]

------------------------------------------------

GROUND TRUTH:
Gender: 1
Ethnicity: 3
Age: 27
1/1 [==============================] - 0s 21ms/step

PREDICTED:
Gender: [[1.]]
Ethnicity: 3.0
Age: [[33.646225]]

------------------------------------------------

GROUND TRUTH:
Gender: 0
Ethnicity: 0
Age: 41
1/1 [==============================] - 0s 26ms/step

PREDICTED:
Gender: [[0.]]
Ethnicity: 0.0
Age: [[43.556606]]

------------------------------------------------

GROUND TRUTH:
Gender: 1
Ethnicity: 4
Age: 26
1/1 [==============================] - 0s 26ms/step

PREDICTED:
Gender: [[1.]]
Ethnicity: 1.0
Age: [[24.228542]]

------------------------------------------------

GROUND TRUTH:
Gender: 0
Ethnicity: 1
Age: 32
1/1 [==============================] - 0s 29ms/step

PREDICTED:
Gender: [[0.]]
Ethnicity: 1.0
Age: [[37.387817]]

------------------------------------------------

GROUND TRUTH:
Gender: 0
Ethnicity: 2
Age: 27
1/1 [==============================] - 0s 22ms/step

PREDICTED:
Gender: [[0.]]
Ethnicity: 2.0
Age: [[29.479849]]

------------------------------------------------

GROUND TRUTH:
Gender: 0
Ethnicity: 3
Age: 52
1/1 [==============================] - 0s 23ms/step

PREDICTED:
Gender: [[0.]]
Ethnicity: 3.0
Age: [[34.524754]]

------------------------------------------------

GROUND TRUTH:
Gender: 0
Ethnicity: 3
Age: 57
1/1 [==============================] - 0s 28ms/step

PREDICTED:
Gender: [[0.]]
Ethnicity: 3.0
Age: [[54.820274]]

------------------------------------------------

GROUND TRUTH:
Gender: 1
Ethnicity: 0
Age: 19
1/1 [==============================] - 0s 20ms/step

PREDICTED:
Gender: [[1.]]
Ethnicity: 1.0
Age: [[19.117537]]

------------------------------------------------

GROUND TRUTH:
Gender: 0
Ethnicity: 3
Age: 5
1/1 [==============================] - 0s 29ms/step

PREDICTED:
Gender: [[0.]]
Ethnicity: 3.0
Age: [[4.2297616]]

------------------------------------------------

GROUND TRUTH:
Gender: 1
Ethnicity: 0
Age: 26
1/1 [==============================] - 0s 27ms/step

PREDICTED:
Gender: [[1.]]
Ethnicity: 0.0
Age: [[28.880718]]

------------------------------------------------

GROUND TRUTH:
Gender: 1
Ethnicity: 1
Age: 32
1/1 [==============================] - 0s 21ms/step

PREDICTED:
Gender: [[1.]]
Ethnicity: 1.0
Age: [[28.647858]]

------------------------------------------------

GROUND TRUTH:
Gender: 1
Ethnicity: 0
Age: 45
1/1 [==============================] - 0s 22ms/step

PREDICTED:
Gender: [[1.]]
Ethnicity: 3.0
Age: [[28.842196]]

------------------------------------------------

GROUND TRUTH:
Gender: 1
Ethnicity: 1
Age: 25
1/1 [==============================] - 0s 28ms/step

PREDICTED:
Gender: [[1.]]
Ethnicity: 1.0
Age: [[26.471973]]

------------------------------------------------

GROUND TRUTH:
Gender: 1
Ethnicity: 3
Age: 20
1/1 [==============================] - 0s 23ms/step

PREDICTED:
Gender: [[1.]]
Ethnicity: 3.0
Age: [[20.517118]]

------------------------------------------------

GROUND TRUTH:
Gender: 1
Ethnicity: 0
Age: 32
1/1 [==============================] - 0s 29ms/step

PREDICTED:
Gender: [[0.]]
Ethnicity: 0.0
Age: [[41.167397]]

------------------------------------------------

VIZUALIZATION AND EVALUATION(CONFUSION MATRIX, PRECISON, RECALL AND F1 SCORE)¶

In [ ]:
from sklearn.metrics import classification_report, confusion_matrix
In [ ]:
pred = model2.predict(X_test)
128/128 [==============================] - 1s 8ms/step
In [ ]:
#CONFUSION MATRIX - GENDER
Y_pred_gender = np.round(pred[0], 2)
Y_true_gender = y_gender_test
print('Confusion Matrix')
cm = confusion_matrix(Y_true_gender,np.round(pred[0]))
sns.heatmap(cm, annot=True, fmt='d', cbar=False, cmap='Greens');
target_names = ['Male', 'Female']
print(classification_report(Y_true_gender, np.round(pred[0]), target_names=target_names))
Confusion Matrix
              precision    recall  f1-score   support

        Male       0.89      0.96      0.93      2140
      Female       0.95      0.87      0.91      1934

    accuracy                           0.92      4074
   macro avg       0.92      0.92      0.92      4074
weighted avg       0.92      0.92      0.92      4074

In [ ]:
#CONFUSION MATRIX - ETHNICITY
#0 - represents white, 1 - represents black,2 - represents asian,3 - represents indian and 4 - represents others
Y_pred_Ethn = np.argmax(pred[1],axis=1)
Y_true_ethnicity = np.argmax(y_ethnicity_test,axis = 1)
cm = confusion_matrix(Y_true_ethnicity,Y_pred_Ethn)
sns.heatmap(cm, annot=True, fmt='d', cbar=False, cmap='Greens');
target_names = ['white', 'black','asian','indian','others']
print(classification_report(Y_true_ethnicity, Y_pred_Ethn, target_names=target_names))
              precision    recall  f1-score   support

       white       0.85      0.90      0.87      1802
       black       0.87      0.81      0.84       832
       asian       0.87      0.77      0.81       478
      indian       0.70      0.85      0.76       690
      others       0.74      0.37      0.50       272

    accuracy                           0.82      4074
   macro avg       0.81      0.74      0.76      4074
weighted avg       0.82      0.82      0.82      4074

In [ ]:
#CONFUSION MATRIX - AGE
plt.figure(figsize=(8, 5))
plt.scatter(pred[2], y_age_test)
plt.plot(pred[2], pred[2], color='red')  # Add a regression line
plt.xlabel('Predicted Values')
plt.ylabel('Actual Values')
plt.title('Scatter Plot with Regression Line')
plt.show()

EXPERIMENTS¶

1. TEST ON NON MASKED IMAGES¶

In [ ]:
groundtruth = np.empty((0, 50, 50, 3), dtype=np.float32)
df_nonmasked = pd.DataFrame(columns=['Age', 'Gender', 'Ethnicity'])
for filename in os.listdir('/content/drive/MyDrive/new/'):
      img = cv2.imread('/content/drive/MyDrive/new/'+ str(filename))
      img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)      
      resized_img = cv2.resize(img, (50, 50), interpolation=cv2.INTER_LINEAR)
      flattened_img = resized_img.flatten()
      normalized_img = (flattened_img - flattened_img.min()) / (flattened_img.max() - flattened_img.min())
      normalized_img = normalized_img.reshape(1,50,50,3)
      groundtruth = np.append(groundtruth, normalized_img, axis=0)
      age, gender, ethnicity = filename.split("_")[:3]
      df_nonmasked = df_nonmasked.append({'Age': int(age), 'Gender': int(gender), 'Ethnicity': int(ethnicity)}, ignore_index=True)
truth_values = np.array(df_nonmasked[['Gender', 'Ethnicity', 'Age']])
# Segregating the labels into different arrays
y_gender_test_1 = truth_values[:,0]
y_ethnicity_test_1 = truth_values[:,1]
y_age_test_1 = truth_values[:,2]
y_gender_test_1 = y_gender_test_1.astype(int)
y_age_test_1 = y_age_test_1.astype(int)
y_ethnicity_1 = to_categorical(y_ethnicity_test_1)
y_ethnicity_test_1 = y_ethnicity_1
<ipython-input-21-c99b10c693a4>:12: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.
  df_nonmasked = df_nonmasked.append({'Age': int(age), 'Gender': int(gender), 'Ethnicity': int(ethnicity)}, ignore_index=True)
<ipython-input-21-c99b10c693a4>:12: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.
  df_nonmasked = df_nonmasked.append({'Age': int(age), 'Gender': int(gender), 'Ethnicity': int(ethnicity)}, ignore_index=True)
<ipython-input-21-c99b10c693a4>:12: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.
  df_nonmasked = df_nonmasked.append({'Age': int(age), 'Gender': int(gender), 'Ethnicity': int(ethnicity)}, ignore_index=True)
<ipython-input-21-c99b10c693a4>:12: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.
  df_nonmasked = df_nonmasked.append({'Age': int(age), 'Gender': int(gender), 'Ethnicity': int(ethnicity)}, ignore_index=True)
<ipython-input-21-c99b10c693a4>:12: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.
  df_nonmasked = df_nonmasked.append({'Age': int(age), 'Gender': int(gender), 'Ethnicity': int(ethnicity)}, ignore_index=True)
<ipython-input-21-c99b10c693a4>:12: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.
  df_nonmasked = df_nonmasked.append({'Age': int(age), 'Gender': int(gender), 'Ethnicity': int(ethnicity)}, ignore_index=True)
<ipython-input-21-c99b10c693a4>:12: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.
  df_nonmasked = df_nonmasked.append({'Age': int(age), 'Gender': int(gender), 'Ethnicity': int(ethnicity)}, ignore_index=True)
<ipython-input-21-c99b10c693a4>:12: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.
  df_nonmasked = df_nonmasked.append({'Age': int(age), 'Gender': int(gender), 'Ethnicity': int(ethnicity)}, ignore_index=True)
<ipython-input-21-c99b10c693a4>:12: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.
  df_nonmasked = df_nonmasked.append({'Age': int(age), 'Gender': int(gender), 'Ethnicity': int(ethnicity)}, ignore_index=True)
<ipython-input-21-c99b10c693a4>:12: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.
  df_nonmasked = df_nonmasked.append({'Age': int(age), 'Gender': int(gender), 'Ethnicity': int(ethnicity)}, ignore_index=True)
<ipython-input-21-c99b10c693a4>:12: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.
  df_nonmasked = df_nonmasked.append({'Age': int(age), 'Gender': int(gender), 'Ethnicity': int(ethnicity)}, ignore_index=True)
<ipython-input-21-c99b10c693a4>:12: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.
  df_nonmasked = df_nonmasked.append({'Age': int(age), 'Gender': int(gender), 'Ethnicity': int(ethnicity)}, ignore_index=True)
<ipython-input-21-c99b10c693a4>:12: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.
  df_nonmasked = df_nonmasked.append({'Age': int(age), 'Gender': int(gender), 'Ethnicity': int(ethnicity)}, ignore_index=True)
<ipython-input-21-c99b10c693a4>:12: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.
  df_nonmasked = df_nonmasked.append({'Age': int(age), 'Gender': int(gender), 'Ethnicity': int(ethnicity)}, ignore_index=True)
<ipython-input-21-c99b10c693a4>:12: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.
  df_nonmasked = df_nonmasked.append({'Age': int(age), 'Gender': int(gender), 'Ethnicity': int(ethnicity)}, ignore_index=True)
<ipython-input-21-c99b10c693a4>:12: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.
  df_nonmasked = df_nonmasked.append({'Age': int(age), 'Gender': int(gender), 'Ethnicity': int(ethnicity)}, ignore_index=True)
<ipython-input-21-c99b10c693a4>:12: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.
  df_nonmasked = df_nonmasked.append({'Age': int(age), 'Gender': int(gender), 'Ethnicity': int(ethnicity)}, ignore_index=True)
<ipython-input-21-c99b10c693a4>:12: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.
  df_nonmasked = df_nonmasked.append({'Age': int(age), 'Gender': int(gender), 'Ethnicity': int(ethnicity)}, ignore_index=True)
<ipython-input-21-c99b10c693a4>:12: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.
  df_nonmasked = df_nonmasked.append({'Age': int(age), 'Gender': int(gender), 'Ethnicity': int(ethnicity)}, ignore_index=True)
<ipython-input-21-c99b10c693a4>:12: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.
  df_nonmasked = df_nonmasked.append({'Age': int(age), 'Gender': int(gender), 'Ethnicity': int(ethnicity)}, ignore_index=True)
In [ ]:
pred = model2.predict(groundtruth)

#Accuracy in gender prediction
#Accuracy in ethnicity prediction
#Age mae in age prediction
test_loss,test_gender_loss, test_ethnicity_loss, test_age_loss, test_gender_acc,test_ethnicity_acc,test_age_mae = model2.evaluate(groundtruth, 
                                                                                    [y_gender_test_1, y_ethnicity_test_1, y_age_test_1], verbose=0)
print(f'\nTest gender accuracy: {test_gender_acc}')
print(f'\nTest ethnicity accuracy: {test_ethnicity_acc}')
print(f'\nTest age MAPE: {test_age_mae}')
1/1 [==============================] - 0s 184ms/step

Test gender accuracy: 0.949999988079071

Test ethnicity accuracy: 0.699999988079071

Test age MAPE: 5.635373115539551
In [ ]:
i = 1
for filename in os.listdir('/content/drive/MyDrive/new/'):
      img = cv2.imread('/content/drive/MyDrive/new/'+ str(filename))
      img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
      age,gender,ethnicity = filename.split("_")[:3]
      print(i,".","GROUND TRUTH:")
      print("gender :", gender)
      print("ethnicity:", ethnicity)
      print("age:", age,"\n")
      # mg = np.mean(color_img, axis=2)
      resized_img = cv2.resize(img, (50, 50), interpolation=cv2.INTER_LINEAR)
      plt.figure(figsize=(2, 2))
      plt.imshow(img, interpolation='nearest')
      plt.grid(False)
      plt.xticks([])
      plt.yticks([])
      plt.show()
      flattened_img = resized_img.flatten()
      normalized_img = (flattened_img - flattened_img.min()) / (flattened_img.max() - flattened_img.min())
      normalized_img = normalized_img.reshape(1,50,50,3)
      x = normalized_img[0]
      pred_gender, pred_ethnicity, pred_age = model2.predict(x[np.newaxis])
      gender_acc = np.round(pred_gender)
      ethnicity_acc = np.mean(np.argmax(pred_ethnicity))
      age_mae = np.abs(pred_age)
      i = i+1
      print("PREDICTED:")
      print("gender :", gender_acc[0])
      print("ethnicity:", ethnicity_acc)
      print("age:", age_mae[0])
      print("---------------------------------------\n")
      
      
1 . GROUND TRUTH:
gender : 1
ethnicity: 3
age: 26 

1/1 [==============================] - 0s 20ms/step
PREDICTED:
gender : [1.]
ethnicity: 3.0
age: [22.238214]
---------------------------------------

2 . GROUND TRUTH:
gender : 0
ethnicity: 3
age: 27 

1/1 [==============================] - 0s 26ms/step
PREDICTED:
gender : [0.]
ethnicity: 3.0
age: [27.102823]
---------------------------------------

3 . GROUND TRUTH:
gender : 1
ethnicity: 3
age: 27 

1/1 [==============================] - 0s 27ms/step
PREDICTED:
gender : [1.]
ethnicity: 3.0
age: [24.186087]
---------------------------------------

4 . GROUND TRUTH:
gender : 0
ethnicity: 1
age: 42 

1/1 [==============================] - 0s 30ms/step
PREDICTED:
gender : [0.]
ethnicity: 1.0
age: [29.44516]
---------------------------------------

5 . GROUND TRUTH:
gender : 0
ethnicity: 2
age: 42 

1/1 [==============================] - 0s 38ms/step
PREDICTED:
gender : [0.]
ethnicity: 0.0
age: [37.840855]
---------------------------------------

6 . GROUND TRUTH:
gender : 0
ethnicity: 0
age: 45 

1/1 [==============================] - 0s 29ms/step
PREDICTED:
gender : [0.]
ethnicity: 0.0
age: [46.253906]
---------------------------------------

7 . GROUND TRUTH:
gender : 0
ethnicity: 1
age: 45 

1/1 [==============================] - 0s 28ms/step
PREDICTED:
gender : [0.]
ethnicity: 1.0
age: [42.55917]
---------------------------------------

8 . GROUND TRUTH:
gender : 0
ethnicity: 3
age: 45 

1/1 [==============================] - 0s 21ms/step
PREDICTED:
gender : [0.]
ethnicity: 3.0
age: [38.13035]
---------------------------------------

9 . GROUND TRUTH:
gender : 1
ethnicity: 0
age: 45 

1/1 [==============================] - 0s 26ms/step
PREDICTED:
gender : [1.]
ethnicity: 0.0
age: [31.255878]
---------------------------------------

10 . GROUND TRUTH:
gender : 0
ethnicity: 1
age: 46 

1/1 [==============================] - 0s 20ms/step
PREDICTED:
gender : [0.]
ethnicity: 3.0
age: [44.244442]
---------------------------------------

11 . GROUND TRUTH:
gender : 1
ethnicity: 0
age: 53 

1/1 [==============================] - 0s 27ms/step
PREDICTED:
gender : [1.]
ethnicity: 0.0
age: [43.7983]
---------------------------------------

12 . GROUND TRUTH:
gender : 0
ethnicity: 0
age: 55 

1/1 [==============================] - 0s 28ms/step
PREDICTED:
gender : [1.]
ethnicity: 0.0
age: [69.75249]
---------------------------------------

13 . GROUND TRUTH:
gender : 1
ethnicity: 2
age: 6 

1/1 [==============================] - 0s 23ms/step
PREDICTED:
gender : [1.]
ethnicity: 1.0
age: [8.790279]
---------------------------------------

14 . GROUND TRUTH:
gender : 1
ethnicity: 1
age: 70 

1/1 [==============================] - 0s 26ms/step
PREDICTED:
gender : [1.]
ethnicity: 0.0
age: [73.21848]
---------------------------------------

15 . GROUND TRUTH:
gender : 0
ethnicity: 1
age: 75 

1/1 [==============================] - 0s 20ms/step
PREDICTED:
gender : [0.]
ethnicity: 0.0
age: [64.49116]
---------------------------------------

16 . GROUND TRUTH:
gender : 0
ethnicity: 3
age: 75 

1/1 [==============================] - 0s 26ms/step
PREDICTED:
gender : [0.]
ethnicity: 3.0
age: [66.41054]
---------------------------------------

17 . GROUND TRUTH:
gender : 0
ethnicity: 0
age: 80 

1/1 [==============================] - 0s 20ms/step
PREDICTED:
gender : [0.]
ethnicity: 0.0
age: [83.888275]
---------------------------------------

18 . GROUND TRUTH:
gender : 1
ethnicity: 4
age: 9 

1/1 [==============================] - 0s 25ms/step
PREDICTED:
gender : [1.]
ethnicity: 4.0
age: [12.765714]
---------------------------------------

19 . GROUND TRUTH:
gender : 1
ethnicity: 2
age: 44 

1/1 [==============================] - 0s 25ms/step
PREDICTED:
gender : [1.]
ethnicity: 2.0
age: [39.49533]
---------------------------------------

20 . GROUND TRUTH:
gender : 1
ethnicity: 0
age: 31 

1/1 [==============================] - 0s 26ms/step
PREDICTED:
gender : [1.]
ethnicity: 3.0
age: [33.030975]
---------------------------------------

2. Testing on Real-time occluded facial images - ROF dataset¶

To evaluate the model’s performance on facial images that do not have superimposed occlusions, we used a subset of the “Real World Occluded Faces (ROF)” dataset.

In [ ]:
for filename in os.listdir('/content/drive/MyDrive/masked/'):
  path = '/content/drive/MyDrive/masked/'+str(filename)+'/'
  # print(path)
  for f in os.listdir(path):
      # print(f)
      img = cv2.imread(path+ str(f))
      img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
      print(filename[7:])
      # img = np.mean(color_img, axis=2)
      resized_img = cv2.resize(img, (50, 50), interpolation=cv2.INTER_LINEAR)
      age,gender,ethnicity = filename.split("_")[:3]
      print("GROUND TRUTH:")
      print("age:",age)
      print("gender :", gender)
      print("ethnicity:", ethnicity)
      plt.figure(figsize=(2, 2))
      plt.imshow(img, interpolation='nearest')
      plt.grid(False)
      plt.xticks([])
      plt.yticks([])
      plt.show()
      flattened_img = resized_img.flatten()
      normalized_img = (flattened_img - flattened_img.min()) / (flattened_img.max() - flattened_img.min())
      normalized_img = normalized_img.reshape(1,50,50,3)
      x = normalized_img[0]
      pred_gender, pred_ethnicity, pred_age = model2.predict(x[np.newaxis])
      gender_acc = np.round(pred_gender)
      ethnicity_acc = np.mean(np.argmax(pred_ethnicity))
      age_mae = np.abs(pred_age)
      print("PREDICTED:")
      print("age:", age_mae[0])
      print("gender :", gender_acc[0])
      print("ethnicity:", ethnicity_acc)

      print("---------------------------------------\n")
thomas_muller_wearing_mask
GROUND TRUTH:
age: 33
gender : 0
ethnicity: 0
1/1 [==============================] - 0s 22ms/step
PREDICTED:
age: [34.856518]
gender : [0.]
ethnicity: 0.0
---------------------------------------

patty_murray_wearing_mask
GROUND TRUTH:
age: 72
gender : 0
ethnicity: 0
1/1 [==============================] - 0s 28ms/step
PREDICTED:
age: [62.191677]
gender : [0.]
ethnicity: 0.0
---------------------------------------

naomi_osaka_wearing_mask
GROUND TRUTH:
age: 26
gender : 1
ethnicity: 2
1/1 [==============================] - 0s 22ms/step
PREDICTED:
age: [27.595806]
gender : [1.]
ethnicity: 2.0
---------------------------------------

justin_trudeau_wearing_mask
GROUND TRUTH:
age: 52
gender : 0
ethnicity: 0
1/1 [==============================] - 0s 27ms/step
PREDICTED:
age: [46.24346]
gender : [0.]
ethnicity: 0.0
---------------------------------------

kamala_haris_wearing_mask
GROUND TRUTH:
age: 60
gender : 1
ethnicity: 3
1/1 [==============================] - 0s 28ms/step
PREDICTED:
age: [39.141758]
gender : [1.]
ethnicity: 2.0
---------------------------------------

lewis_hamilton_wearing_mask
GROUND TRUTH:
age: 40
gender : 0
ethnicity: 1
1/1 [==============================] - 0s 29ms/step
PREDICTED:
age: [31.239677]
gender : [0.]
ethnicity: 0.0
---------------------------------------

gary_peters_wearing_sunglasses
GROUND TRUTH:
age: 64
gender : 0
ethnicity: 0
1/1 [==============================] - 0s 29ms/step
PREDICTED:
age: [54.20502]
gender : [1.]
ethnicity: 0.0
---------------------------------------

jean_castex_wearing_mask
GROUND TRUTH:
age: 60
gender : 0
ethnicity: 0
1/1 [==============================] - 0s 28ms/step
PREDICTED:
age: [58.57859]
gender : [0.]
ethnicity: 3.0
---------------------------------------

jayson_tatum_wearing_mask
GROUND TRUTH:
age: 23
gender : 0
ethnicity: 1
1/1 [==============================] - 0s 28ms/step
PREDICTED:
age: [14.819469]
gender : [0.]
ethnicity: 0.0
---------------------------------------

immanuel_macron_wearing_mask
GROUND TRUTH:
age: 46
gender : 0
ethnicity: 0
1/1 [==============================] - 0s 27ms/step
PREDICTED:
age: [41.25803]
gender : [0.]
ethnicity: 0.0
---------------------------------------

george_russell_wearing_mask
GROUND TRUTH:
age: 26
gender : 0
ethnicity: 0
1/1 [==============================] - 0s 21ms/step
PREDICTED:
age: [25.097534]
gender : [0.]
ethnicity: 0.0
---------------------------------------

debbie_stabenow_wearing_mask
GROUND TRUTH:
age: 73
gender : 1
ethnicity: 1
1/1 [==============================] - 0s 22ms/step
PREDICTED:
age: [65.29673]
gender : [0.]
ethnicity: 0.0
---------------------------------------

diego_schwartzman_wearing_mask
GROUND TRUTH:
age: 33
gender : 0
ethnicity: 0
1/1 [==============================] - 0s 29ms/step
PREDICTED:
age: [16.143692]
gender : [0.]
ethnicity: 2.0
---------------------------------------

bill_gates_wearing_mask
GROUND TRUTH:
age: 67
gender : 0
ethnicity: 0
1/1 [==============================] - 0s 30ms/step
PREDICTED:
age: [49.584934]
gender : [0.]
ethnicity: 0.0
---------------------------------------

bruno_fernandes_wearing_mask
GROUND TRUTH:
age: 45
gender : 0
ethnicity: 0
1/1 [==============================] - 0s 24ms/step
PREDICTED:
age: [39.678963]
gender : [0.]
ethnicity: 1.0
---------------------------------------

carlos_sainz_wearing_mask
GROUND TRUTH:
age: 30
gender : 0
ethnicity: 4
1/1 [==============================] - 0s 23ms/step
PREDICTED:
age: [38.603924]
gender : [0.]
ethnicity: 0.0
---------------------------------------

charles_leclerc_wearing_mask
GROUND TRUTH:
age: 25
gender : 0
ethnicity: 0
1/1 [==============================] - 0s 25ms/step
PREDICTED:
age: [38.439857]
gender : [1.]
ethnicity: 0.0
---------------------------------------

chuck_schumer_wearing_mask
GROUND TRUTH:
age: 71
gender : 0
ethnicity: 0
1/1 [==============================] - 0s 23ms/step
PREDICTED:
age: [63.156677]
gender : [1.]
ethnicity: 0.0
---------------------------------------

alexander_zverev_wearing_mask
GROUND TRUTH:
age: 36
gender : 0
ethnicity: 0
1/1 [==============================] - 0s 30ms/step
PREDICTED:
age: [25.654848]
gender : [0.]
ethnicity: 0.0
---------------------------------------

evangeline_lilly_wearing_sunglasses
GROUND TRUTH:
age: 45
gender : 1
ethnicity: 0
1/1 [==============================] - 0s 23ms/step
PREDICTED:
age: [34.360504]
gender : [1.]
ethnicity: 0.0
---------------------------------------

emily_blunt_wearing_sunglasses
GROUND TRUTH:
age: 38
gender : 1
ethnicity: 0
1/1 [==============================] - 0s 30ms/step
PREDICTED:
age: [20.562706]
gender : [1.]
ethnicity: 0.0
---------------------------------------

chris_hemsworth_wearing_sunglasses
GROUND TRUTH:
age: 32
gender : 0
ethnicity: 0
1/1 [==============================] - 0s 32ms/step
PREDICTED:
age: [9.4158745]
gender : [0.]
ethnicity: 0.0
---------------------------------------

christian_bale_wearing_sunglasses
GROUND TRUTH:
age: 50
gender : 0
ethnicity: 0
1/1 [==============================] - 0s 37ms/step
PREDICTED:
age: [58.701115]
gender : [0.]
ethnicity: 0.0
---------------------------------------

bruce_willis_wearing_sunglasses
GROUND TRUTH:
age: 68
gender : 0
ethnicity: 0
1/1 [==============================] - 0s 20ms/step
PREDICTED:
age: [50.6321]
gender : [0.]
ethnicity: 0.0
---------------------------------------

gary_peters_mask
GROUND TRUTH:
age: 59
gender : 0
ethnicity: 0
1/1 [==============================] - 0s 24ms/step
PREDICTED:
age: [54.334972]
gender : [0.]
ethnicity: 0.0
---------------------------------------

adrien_brody_wearing_sunglasses
GROUND TRUTH:
age: 48
gender : 0
ethnicity: 0
1/1 [==============================] - 0s 27ms/step
PREDICTED:
age: [43.86658]
gender : [0.]
ethnicity: 0.0
---------------------------------------

anthony_mackie_wearing_sunglasses
GROUND TRUTH:
age: 44
gender : 0
ethnicity: 1
1/1 [==============================] - 0s 31ms/step
PREDICTED:
age: [49.658413]
gender : [0.]
ethnicity: 0.0
---------------------------------------

benedict_cumberbatch_wearing_sunglasses
GROUND TRUTH:
age: 41
gender : 0
ethnicity: 0
1/1 [==============================] - 0s 32ms/step
PREDICTED:
age: [33.07537]
gender : [0.]
ethnicity: 0.0
---------------------------------------

arnold_schwarzenegger_wearing_sunglasses
GROUND TRUTH:
age: 75
gender : 0
ethnicity: 0
1/1 [==============================] - 0s 29ms/step
PREDICTED:
age: [50.854046]
gender : [0.]
ethnicity: 0.0
---------------------------------------

brad_pitt_wearing_sunglasses
GROUND TRUTH:
age: 52
gender : 0
ethnicity: 0
1/1 [==============================] - 0s 31ms/step
PREDICTED:
age: [41.90174]
gender : [0.]
ethnicity: 0.0
---------------------------------------

brie_larson_wearing_sunglasses
GROUND TRUTH:
age: 30
gender : 1
ethnicity: 0
1/1 [==============================] - 0s 31ms/step
PREDICTED:
age: [20.949678]
gender : [1.]
ethnicity: 0.0
---------------------------------------

ben_mendelsohn_wearing_sunglasses
GROUND TRUTH:
age: 63
gender : 0
ethnicity: 0
1/1 [==============================] - 0s 30ms/step
PREDICTED:
age: [69.258415]
gender : [0.]
ethnicity: 0.0
---------------------------------------